by Misa Ogura
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision.models as models
from flashtorch.utils import load_image
image = load_image('../examples/images/great_grey_owl_01.jpg')
plt.imshow(image)
plt.title('Original image')
plt.axis('off');
from flashtorch.utils import apply_transforms
input_ = apply_transforms(image)
print(f'Before: {type(image)}')
print(f'After: {type(input_)}, {input_.shape}')
Before: <class 'PIL.Image.Image'> After: <class 'torch.Tensor'>, torch.Size([1, 3, 224, 224])
# plt.imshow(input_)
# plt.title('Input tensor')
# plt.axis('off');
RuntimeError: Can't call numpy() on Variable that requires grad. Use var.detach().numpy() instead.
from flashtorch.utils import format_for_plotting
plt.imshow(format_for_plotting(input_))
plt.title('Input tensor')
plt.axis('off');
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
from flashtorch.utils import denormalize
plt.imshow(format_for_plotting(denormalize(input_)))
plt.title('Input tensor')
plt.axis('off');
from flashtorch.saliency import Backprop
model = models.alexnet(pretrained=True)
backprop = Backprop(model)
Signature:
backprop.calculate_gradients(input_, target_class=None, take_max=False)
from flashtorch.utils import ImageNetIndex
imagenet = ImageNetIndex()
target_class = imagenet['great grey owl']
print(target_class)
24
# imagenet['dog']
ValueError: Multiple potential matches found: maltese dog, old english sheepdog, shetland sheepdog, greater swiss mountain dog, bernese mountain dog, french bulldog, eskimo dog, african hunting dog, dogsled, hotdog
gradients = backprop.calculate_gradients(input_, target_class)
print(type(gradients), gradients.shape)
<class 'torch.Tensor'> torch.Size([3, 224, 224])
max_gradients = backprop.calculate_gradients(input_, target_class, take_max=True)
print(type(max_gradients), max_gradients.shape)
<class 'torch.Tensor'> torch.Size([1, 224, 224])
from flashtorch.utils import visualize
visualize(input_, gradients, max_gradients)
It kind of shows that pixels around the area where the animal is present had strongest positive effects on the prediction.
But it's quite noisy...
guided_gradients = backprop.calculate_gradients(input_, target_class, guided=True)
max_guided_gradients = backprop.calculate_gradients(input_, target_class, take_max=True, guided=True)
visualize(input_, guided_gradients, max_guided_gradients)
Now that's much less noisy!
We can clearly see that pixels around the head and eyes had strongest positive effects on the prediction.
visualize(input_, guided_gradients, max_guided_gradients)
visualize(input_, guided_gradients, max_guided_gradients)
image = load_image('../examples/images/foxglove.jpg')
input_ = apply_transforms(image)
class_index = 96 # foxglove
pretrained_model = create_model()
backprop = Backprop(pretrained_model)
guided_gradients = backprop.calculate_gradients(input_, class_index, guided=True)
guided_max_gradients = backprop.calculate_gradients(input_, class_index, take_max=True, guided=True)
visualize(input_, guided_gradients, guided_max_gradients)
/Users/misao/Projects/personal/flashtorch/flashtorch/saliency/backprop.py:93: UserWarning: The predicted class does not equal the
target class. Calculating the gradient with respect to the
predicted class.
predicted class.'''))
trained_model = create_model('../models/flower_classification_transfer_learning.pt')
backprop = Backprop(trained_model)
guided_gradients = backprop.calculate_gradients(input_, class_index, guided=True)
guided_max_gradients = backprop.calculate_gradients(input_, class_index, take_max=True, guided=True)
visualize(input_, guided_gradients, guided_max_gradients)